Network driver fixes.
/* Miscellaneous private stuff. */
enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ int active;
/*
* DISCONNECT response is deferred until pending requests are ack'ed.
* We therefore need to store the id from the original request.
struct netif_st *hash_next;
struct list_head list; /* scheduling list */
atomic_t refcnt;
- spinlock_t rx_lock, tx_lock;
struct net_device *dev;
struct net_device_stats stats;
void netif_interface_init(void);
void netif_ctrlif_init(void);
-void netif_deschedule(netif_t *netif);
+void netif_schedule_work(netif_t *netif);
+void netif_deschedule_work(netif_t *netif);
int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
struct net_device_stats *netif_be_get_stats(struct net_device *dev);
return netif;
}
+static void __netif_up(netif_t *netif)
+{
+ struct net_device *dev = netif->dev;
+ spin_lock_bh(&dev->xmit_lock);
+ netif->active = 1;
+ spin_unlock_bh(&dev->xmit_lock);
+ (void)request_irq(netif->irq, netif_be_int, 0, dev->name, netif);
+ netif_schedule_work(netif);
+}
+
+static void __netif_down(netif_t *netif)
+{
+ struct net_device *dev = netif->dev;
+ spin_lock_bh(&dev->xmit_lock);
+ netif->active = 0;
+ spin_unlock_bh(&dev->xmit_lock);
+ free_irq(netif->irq, netif);
+ netif_deschedule_work(netif);
+}
+
+static int net_open(struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+ if ( netif->status == CONNECTED )
+ __netif_up(netif);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int net_close(struct net_device *dev)
+{
+ netif_t *netif = netdev_priv(dev);
+ netif_stop_queue(dev);
+ if ( netif->status == CONNECTED )
+ __netif_down(netif);
+ return 0;
+}
+
static void __netif_disconnect_complete(void *arg)
{
netif_t *netif = (netif_t *)arg;
*/
unbind_evtchn_from_irq(netif->evtchn);
vfree(netif->tx); /* Frees netif->rx as well. */
- rtnl_lock();
- (void)dev_close(netif->dev);
- rtnl_unlock();
/* Construct the deferred response message. */
cmsg.type = CMSG_NETIF_BE;
return;
}
- netif = dev->priv;
+ netif = netdev_priv(dev);
memset(netif, 0, sizeof(*netif));
netif->domid = domid;
netif->handle = handle;
netif->status = DISCONNECTED;
- spin_lock_init(&netif->rx_lock);
- spin_lock_init(&netif->tx_lock);
atomic_set(&netif->refcnt, 0);
netif->dev = dev;
dev->hard_start_xmit = netif_be_start_xmit;
dev->get_stats = netif_be_get_stats;
+ dev->open = net_open;
+ dev->stop = net_close;
/* Disable queuing. */
dev->tx_queue_len = 0;
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
- if ( (err = register_netdev(dev)) != 0 )
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+
+ if ( err != 0 )
{
DPRINTK("Could not register new net device %s: err=%d\n",
dev->name, err);
(netif_tx_interface_t *)vma->addr;
netif->rx =
(netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
- netif->status = CONNECTED;
- netif_get(netif);
-
netif->tx->resp_prod = netif->rx->resp_prod = 0;
+ netif_get(netif);
+ wmb(); /* Other CPUs see new state before interface is started. */
rtnl_lock();
- (void)dev_open(netif->dev);
+ netif->status = CONNECTED;
+ wmb();
+ if ( netif_running(netif->dev) )
+ __netif_up(netif);
rtnl_unlock();
- (void)request_irq(netif->irq, netif_be_int, 0, netif->dev->name, netif);
- netif_start_queue(netif->dev);
-
connect->status = NETIF_BE_STATUS_OKAY;
}
if ( netif->status == CONNECTED )
{
+ rtnl_lock();
netif->status = DISCONNECTING;
netif->disconnect_rspid = rsp_id;
- wmb(); /* Let other CPUs see the status change. */
- netif_stop_queue(netif->dev);
- free_irq(netif->irq, netif);
- netif_deschedule(netif);
+ wmb();
+ if ( netif_running(netif->dev) )
+ __netif_down(netif);
+ rtnl_unlock();
netif_put(netif);
return 0; /* Caller should not send response message. */
}
/* Freed TX SKBs get batched on this ring before return to pending_ring. */
static u16 dealloc_ring[MAX_PENDING_REQS];
-static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED;
static PEND_RING_IDX dealloc_prod, dealloc_cons;
static struct sk_buff_head tx_queue;
int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- netif_t *netif = (netif_t *)dev->priv;
+ netif_t *netif = netdev_priv(dev);
ASSERT(skb->dev == dev);
/* Drop the packet if the target domain has no receive buffers. */
- if ( (netif->rx_req_cons == netif->rx->req_prod) ||
+ if ( !netif->active ||
+ (netif->rx_req_cons == netif->rx->req_prod) ||
((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
goto drop;
}
netif->rx_req_cons++;
+ netif_get(netif);
skb_queue_tail(&rx_queue, skb);
tasklet_schedule(&net_rx_tasklet);
mmu = rx_mmu;
while ( (skb = skb_dequeue(&rx_queue)) != NULL )
{
- netif = (netif_t *)skb->dev->priv;
+ netif = netdev_priv(skb->dev);
vdata = (unsigned long)skb->data;
mdata = virt_to_machine(vdata);
if ( net_ratelimit() )
printk(KERN_WARNING "Memory squeeze in netback driver.\n");
mod_timer(&net_timer, jiffies + HZ);
+ skb_queue_head(&rx_queue, skb);
break;
}
mmu = rx_mmu;
while ( (skb = __skb_dequeue(&rxq)) != NULL )
{
- netif = (netif_t *)skb->dev->priv;
+ netif = netdev_priv(skb->dev);
size = skb->tail - skb->data;
/* Rederive the machine addresses. */
notify_list[notify_nr++] = evtchn;
}
+ netif_put(netif);
dev_kfree_skb(skb);
mcl += 2;
struct net_device_stats *netif_be_get_stats(struct net_device *dev)
{
- netif_t *netif = dev->priv;
+ netif_t *netif = netdev_priv(dev);
return &netif->stats;
}
return;
spin_lock_irq(&net_schedule_list_lock);
- if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
+ if ( !__on_net_schedule_list(netif) && netif->active )
{
list_add_tail(&netif->list, &net_schedule_list);
netif_get(netif);
spin_unlock_irq(&net_schedule_list_lock);
}
-static inline void netif_schedule_work(netif_t *netif)
+void netif_schedule_work(netif_t *netif)
{
if ( (netif->tx_req_cons != netif->tx->req_prod) &&
((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
}
}
-void netif_deschedule(netif_t *netif)
+void netif_deschedule_work(netif_t *netif)
{
remove_from_net_schedule_list(netif);
}
netif = pending_tx_info[pending_idx].netif;
- spin_lock(&netif->tx_lock);
make_tx_response(netif, pending_tx_info[pending_idx].req.id,
NETIF_RSP_OKAY);
- spin_unlock(&netif->tx_lock);
pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
static void netif_idx_release(u16 pending_idx)
{
+ static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
- spin_lock_irqsave(&dealloc_lock, flags);
+ spin_lock_irqsave(&_lock, flags);
dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
- spin_unlock_irqrestore(&dealloc_lock, flags);
+ spin_unlock_irqrestore(&_lock, flags);
tasklet_schedule(&net_tx_tasklet);
}
netif_idx_release(pending_idx);
}
-#if 0
-long flush_bufs_for_netif(netif_t *netif)
-{
- NETIF_RING_IDX i;
-
- /* Return any outstanding receive buffers to the guest OS. */
- spin_lock(&netif->rx_lock);
- for ( i = netif->rx_req_cons;
- (i != netif->rx->req_prod) &&
- ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
- i++ )
- {
- make_rx_response(netif,
- netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
- NETIF_RSP_DROPPED, 0, 0);
- }
- netif->rx_req_cons = i;
- spin_unlock(&netif->rx_lock);
-
- /*
- * Flush pending transmit buffers. The guest may still have to wait for
- * buffers that are queued at a physical NIC.
- */
- spin_lock(&netif->tx_lock);
- for ( i = netif->tx_req_cons;
- (i != netif->tx->req_prod) &&
- ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
- i++ )
- {
- make_tx_response(netif,
- netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
- NETIF_RSP_DROPPED);
- }
- netif->tx_req_cons = i;
- spin_unlock(&netif->tx_lock);
-
- return 0;
-}
-#endif
-
irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
{
netif_t *netif = dev_id;
static int network_open(struct net_device *dev)
{
- struct net_private *np = dev->priv;
+ struct net_private *np = netdev_priv(dev);
memset(&np->stats, 0, sizeof(np->stats));
{
NETIF_RING_IDX i, prod;
unsigned short id;
- struct net_private *np = dev->priv;
+ struct net_private *np = netdev_priv(dev);
struct sk_buff *skb;
if ( np->backend_state != BEST_CONNECTED )
static void network_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
- struct net_private *np = dev->priv;
+ struct net_private *np = netdev_priv(dev);
struct sk_buff *skb;
int i, batch_target;
NETIF_RING_IDX req_prod = np->rx->req_prod;
static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
- struct net_private *np = (struct net_private *)dev->priv;
+ struct net_private *np = netdev_priv(dev);
netif_tx_request_t *tx;
NETIF_RING_IDX i;
static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
{
struct net_device *dev = dev_id;
- struct net_private *np = dev->priv;
+ struct net_private *np = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&np->tx_lock, flags);
static int netif_poll(struct net_device *dev, int *pbudget)
{
- struct net_private *np = dev->priv;
+ struct net_private *np = netdev_priv(dev);
struct sk_buff *skb, *nskb;
netif_rx_response_t *rx;
NETIF_RING_IDX i, rp;
static int network_close(struct net_device *dev)
{
- struct net_private *np = dev->priv;
+ struct net_private *np = netdev_priv(dev);
np->user_state = UST_CLOSED;
netif_stop_queue(np->dev);
return 0;
static struct net_device_stats *network_get_stats(struct net_device *dev)
{
- struct net_private *np = (struct net_private *)dev->priv;
+ struct net_private *np = netdev_priv(dev);
return &np->stats;
}
int i, requeue_idx;
netif_tx_request_t *tx;
- np = dev->priv;
+ np = netdev_priv(dev);
spin_lock_irq(&np->tx_lock);
spin_lock(&np->rx_lock);
* Allocates tx/rx pages.
* Sends connect message to xend.
*/
-static void vif_disconnect(struct net_private *np){
+static void vif_disconnect(struct net_private *np)
+{
DPRINTK(">\n");
if(np->tx) free_page((unsigned long)np->tx);
if(np->rx) free_page((unsigned long)np->rx);
goto exit;
}
- np = dev->priv;
+ np = netdev_priv(dev);
np->backend_state = BEST_CLOSED;
np->user_state = UST_CLOSED;
np->handle = handle;
exit:
if ( np != NULL )
- *np = ((dev && !err) ? dev->priv : NULL);
+ *np = ((dev && !err) ? netdev_priv(dev) : NULL);
DPRINTK("< err=%d\n", err);
return err;
}
# Add/remove vif to/from bridge.
brctl ${brcmd} ${bridge} ${vif}
+ifconfig ${vif} $OP
if [ ${ip} ] ; then